Patch to run a domU in shadow test mode.
to a character device, allowing device prototyping in application
space. Odds are that you want to say N here.
+config XEN_SHADOW_MODE
+ bool "Fake shadow mode"
+ default n
+ help
+ fakes out a shadow mode kernel
+
+
config XEN_SCRUB_PAGES
bool "Scrub memory before freeing it to Xen"
default y
CONFIG_XEN_PHYSDEV_ACCESS=y
CONFIG_XEN_BLKDEV_BACKEND=y
# CONFIG_XEN_BLKDEV_TAP_BE is not set
+# CONFIG_XEN_BLKDEV_GRANT is not set
CONFIG_XEN_NETDEV_BACKEND=y
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.11-xenU
-# Fri Mar 11 01:20:28 2005
+# Tue Apr 5 16:44:33 2005
#
CONFIG_XEN=y
CONFIG_ARCH_XEN=y
#
# CONFIG_XEN_PRIVILEGED_GUEST is not set
# CONFIG_XEN_PHYSDEV_ACCESS is not set
+# CONFIG_XEN_BLKDEV_GRANT is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
+CONFIG_XEN_SHADOW_MODE=y
CONFIG_XEN_SCRUB_PAGES=y
CONFIG_X86=y
# CONFIG_X86_64 is not set
#define pmd_val_ma(v) (v).pud.pgd.pgd;
#endif
+#ifndef CONFIG_XEN_SHADOW_MODE
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
mmu_update_t u;
u.val = pmd_val_ma(val);
BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
+#endif
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
#endif /* CONFIG_SMP */
+#ifndef CONFIG_XEN_SHADOW_MODE
void xen_pgd_pin(unsigned long ptr)
{
struct mmuext_op op;
op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
+#endif
void xen_set_ldt(unsigned long ptr, unsigned long len)
{
kmem_cache_free(pgd_cache, pgd);
}
+#ifndef CONFIG_XEN_SHADOW_MODE
void make_lowmem_page_readonly(void *va)
{
pgd_t *pgd = pgd_offset_k((unsigned long)va);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
+#endif /* CONFIG_XEN_SHADOW_MODE */
*/
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+
+#ifndef CONFIG_XEN_SHADOW_MODE
#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+#else
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
#define ptep_get_and_clear(xp) __pte_ma(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
} \
} while (0)
+#ifndef CONFIG_XEN_SHADOW_MODE
void make_lowmem_page_readonly(void *va);
void make_lowmem_page_writable(void *va);
void make_page_readonly(void *va);
void make_page_writable(void *va);
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);
+#else
+#define make_lowmem_page_readonly(_va) ((void)0)
+#define make_lowmem_page_writable(_va) ((void)0)
+#define make_page_readonly(_va) ((void)0)
+#define make_page_writable(_va) ((void)0)
+#define make_pages_readonly(_va, _nr) ((void)0)
+#define make_pages_writable(_va, _nr) ((void)0)
+#endif
#define arbitrary_virt_to_machine(__va) \
({ \
* be MACHINE addresses.
*/
-void xen_l1_entry_update(pte_t *ptr, unsigned long val);
-void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
void xen_pt_switch(unsigned long ptr);
void xen_tlb_flush(void);
void xen_invlpg(unsigned long ptr);
+
+#ifndef CONFIG_XEN_SHADOW_MODE
+void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
void xen_pgd_pin(unsigned long ptr);
void xen_pgd_unpin(unsigned long ptr);
void xen_pte_pin(unsigned long ptr);
void xen_pte_unpin(unsigned long ptr);
+#else
+#define xen_l1_entry_update(_p, _v) set_pte((_p), (pte_t){(_v)})
+#define xen_l2_entry_update(_p, _v) set_pgd((_p), (pgd_t){(_v)})
+#define xen_pgd_pin(_p) ((void)0)
+#define xen_pgd_unpin(_p) ((void)0)
+#define xen_pte_pin(_p) ((void)0)
+#define xen_pte_unpin(_p) ((void)0)
+#endif
+
void xen_set_ldt(unsigned long ptr, unsigned long bytes);
void xen_machphys_update(unsigned long mfn, unsigned long pfn);
}
*vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+#if !(1 || defined(GROSS_HACK_TO_TEST_SHADOW_MODE_CLIENTS))
if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
(count < ((vpt_end -dsi.v_start)>>PAGE_SHIFT)) )
*vl1e &= ~_PAGE_RW;
+#endif
vl1e++;
}
munmap(vl1tab, PAGE_SIZE);
}
munmap(physmap, PAGE_SIZE);
+#if 1 || defined(GROSS_HACK_TO_TEST_SHADOW_MODE_CLIENTS)
+ {
+ int ret;
+ ret = xc_shadow_control(xc_handle, dom,
+ DOM0_SHADOW_CONTROL_OP_ENABLE_TEST,
+ NULL, 0, NULL);
+ if ( !ret )
+ ERROR("enabling shadow test mode failed\n");
+ }
+#endif
+
/*
* Pin down l2tab addr as page dir page - causes hypervisor to provide
* correct protection for the page
*/
+#if !(1 || defined(GROSS_HACK_TO_TEST_SHADOW_MODE_CLIENTS))
if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom) )
goto error_out;
+#endif
start_info = xc_map_foreign_range(
xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
phys_basetab = c->pt_base;
ed->arch.guest_table = mk_pagetable(phys_basetab);
- if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
- PGT_base_page_table) )
- return -EINVAL;
+ if ( shadow_mode_enabled(d) )
+ {
+ if ( !get_page(&frame_table[phys_basetab>>PAGE_SHIFT], d) )
+ return -EINVAL;
+ }
+ else
+ {
+ if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
+ PGT_base_page_table) )
+ return -EINVAL;
+ }
/* Failure to set GDT is harmless. */
SET_GDT_ENTRIES(ed, DEFAULT_GDT_ENTRIES);
gpfn = __mfn_to_gpfn(d, mfn);
ASSERT(VALID_M2P(gpfn));
- if ( page_is_page_table(page) )
+ if ( page_is_page_table(page) &&
+ !page_out_of_sync(page) )
+ {
shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
+ }
}
*(unsigned long *)va = req.val;
local_flush_tlb();
break;
case UVMF_ALL:
- BUG_ON(shadow_mode_enabled(d));
+ BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
flush_tlb_mask(d->cpuset);
break;
default:
local_flush_tlb_one(va);
break;
case UVMF_ALL:
- BUG_ON(shadow_mode_enabled(d));
+ BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
flush_tlb_one_mask(d->cpuset, va);
break;
default:
if ( shadow_mode_log_dirty(dom) )
__mark_dirty(dom, mfn);
- if ( page_is_page_table(page) )
+ if ( page_is_page_table(page) && !page_out_of_sync(page) )
shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
}
* e.g., You are expected to have paused the domain and synchronized CR3.
*/
- shadow_audit(d, 1);
-
if( !d->arch.shadow_ht ) return;
+ shadow_audit(d, 1);
+
// first, remove any outstanding refs from out_of_sync entries...
//
free_out_of_sync_state(d);
ASSERT(pfn_is_ram(mfn));
ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page);
- FSH_LOG("mark_mfn_out_of_sync(gpfn=%p, mfn=%p) c=%p t=%p",
+ FSH_LOG("%s(gpfn=%p, mfn=%p) c=%p t=%p", __func__,
gpfn, mfn, page->count_info, page->u.inuse.type_info);
// XXX this will require some more thought... Cross-domain sharing and
u32 count = 0;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ perfc_incrc(remove_all_access);
for (i = 0; i < shadow_ht_buckets; i++)
{
unsigned long *gpl1e, *spl1e;
int errors = 0, oos_ptes = 0;
- // First check to see if this guest page is currently the active
- // PTWR page. If so, then we compare the (old) cached copy of the
- // guest page to the shadow, and not the currently writable (and
- // thus potentially out-of-sync) guest page.
- //
- if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) )
- BUG();
-
if ( page_out_of_sync(pfn_to_page(gmfn)) )
{
gmfn = __shadow_status(d, gpfn, PGT_snapshot);
unsigned long ptbase_mfn = 0;
int errors = 0, limit, oos_pdes = 0;
- _audit_domain(d, AUDIT_QUIET);
+ //_audit_domain(d, AUDIT_QUIET);
shadow_lock(d);
sh_check_name = s;
if ( page_out_of_sync(page) )
__shadow_sync_mfn(d, mpfn + j);
shadow_remove_all_access(d, mpfn + j);
-
- if (page->count_info != 1)
- {
- printk("free_dom_mem in shadow mode didn't release page "
- "mfn=%p c=%p\n", mpfn+j, page->count_info);
- shadow_unlock(d);
- audit_domain(d);
- BUG();
- }
shadow_unlock(d);
}
#define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
/************************************************************************/
-#define SHADOW_DEBUG 1
-#define SHADOW_VERBOSE_DEBUG 1
-#define SHADOW_VVERBOSE_DEBUG 1
-#define SHADOW_HASH_DEBUG 1
-#define FULLSHADOW_DEBUG 1
+#define SHADOW_DEBUG 0
+#define SHADOW_VERBOSE_DEBUG 0
+#define SHADOW_VVERBOSE_DEBUG 0
+#define SHADOW_HASH_DEBUG 0
+#define FULLSHADOW_DEBUG 0
#if SHADOW_DEBUG
extern int shadow_status_noswap;
static inline void update_pagetables(struct exec_domain *ed)
{
struct domain *d = ed->domain;
+ int paging_enabled;
#ifdef CONFIG_VMX
- int paging_enabled =
- !VMX_DOMAIN(ed) ||
- test_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
-#else
- const int paging_enabled = 1;
+ if ( VMX_DOMAIN(ed) )
+ paging_enabled =
+ test_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
+ else
#endif
+ // HACK ALERT: there's currently no easy way to figure out if a domU
+ // has set its arch.guest_table to zero, vs not yet initialized it.
+ //
+ paging_enabled = !!pagetable_val(ed->arch.guest_table);
/*
* We don't call __update_pagetables() when vmx guest paging is
PERFCOUNTER_CPU(validate_hl2e_changes, "validate_hl2e makes changes")
PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed")
PERFCOUNTER_CPU(gpfn_to_mfn_foreign, "calls to gpfn_to_mfn_foreign")
+PERFCOUNTER_CPU(remove_all_access, "calls to remove_all_access")
PERFCOUNTER_CPU(remove_write_access, "calls to remove_write_access")
PERFCOUNTER_CPU(remove_write_access_easy, "easy outs of remove_write_access")
PERFCOUNTER_CPU(remove_write_no_work, "no work in remove_write_access")